\scriptsize
\begin{verbatim}
typedef struct shared_info {
- vcpu_info_t vcpu_info[MAX_VIRT_CPUS];
+ vcpu_info_t vcpu_info[XEN_LEGACY_MAX_VCPUS];
/*
* A domain can create "event channels" on which it can send and receive
return res;
}
-vcpu_guest_context_any_t vcpu_ctx_any[MAX_VIRT_CPUS];
+vcpu_guest_context_any_t *vcpu_ctx_any;
int vcpu_setcontext (int vcpu)
{
{
int ret;
struct sigaction sa;
-
- cur_ctx = &vcpu_ctx_any[vcpu].c;
+ xc_dominfo_t dominfo;
xc_handle = xc_interface_open (); /* for accessing control interface */
+ ret = xc_domain_getinfo (xc_handle, domid, 1, &dominfo);
+ if (ret < 0) {
+ perror ("xc_domain_getinfo");
+ exit (-1);
+ }
+
+ vcpu_ctx_any = calloc (sizeof(vcpu_ctx_any), dominfo.max_vcpu_id + 1);
+ if (!vcpu_ctx_any) {
+ perror ("vcpu context array alloc");
+ exit (-1);
+ }
+ cur_ctx = &vcpu_ctx_any[vcpu].c;
+
if (xc_domain_setdebugging (xc_handle, domid, 1) != 0)
perror ("setdebugging");
# all archs
"xen_pfn_to_cr3",
- "MAX_VIRT_CPUS",
+ "XEN_LEGACY_MAX_VCPUS",
"MAX_GUEST_CMDLINE" ];
/* clear any pending events and the selector */
memset(&(shared_info->evtchn_pending[0]), 0,
sizeof (shared_info->evtchn_pending));
- for (i = 0; i < MAX_VIRT_CPUS; i++)
+ for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++)
shared_info->vcpu_info[i].evtchn_pending_sel = 0;
if (start_info_pfn != NULL)
/* vcpu map */
uint64_t *vcpumap = NULL;
- if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, MAX_VIRT_CPUS,
+ if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, XEN_LEGACY_MAX_VCPUS,
&vcpumap))
goto out;
return -1;
/* vcpu map */
- if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, MAX_VIRT_CPUS,
+ if (xc_ia64_send_vcpumap(xc_handle, io_fd, dom, info, XEN_LEGACY_MAX_VCPUS,
&vcpumap))
goto out;
int nr_vcpus = 0;
char *dump_mem, *dump_mem_start = NULL;
- vcpu_guest_context_any_t ctxt[MAX_VIRT_CPUS];
+ vcpu_guest_context_any_t *ctxt = NULL;
struct xc_core_arch_context arch_ctxt;
char dummy[PAGE_SIZE];
int dummy_len;
goto out;
}
+ ctxt = calloc(sizeof(*ctxt), info.max_vcpu_id + 1);
+ if ( !ctxt )
+ {
+ PERROR("Could not allocate vcpu context array", domid);
+ goto out;
+ }
+
for ( i = 0; i <= info.max_vcpu_id; i++ )
{
if ( xc_vcpu_getcontext(xc_handle, domid, i, &ctxt[nr_vcpus]) == 0 )
xc_core_shdr_free(sheaders);
if ( strtab != NULL )
xc_core_strtab_free(strtab);
+ if ( ctxt != NULL )
+ free(ctxt);
if ( dump_mem_start != NULL )
free(dump_mem_start);
if ( live_shinfo != NULL )
void
xc_core_arch_context_init(struct xc_core_arch_context* arch_ctxt)
{
- int i;
-
arch_ctxt->mapped_regs_size =
(XMAPPEDREGS_SIZE < PAGE_SIZE) ? PAGE_SIZE: XMAPPEDREGS_SIZE;
arch_ctxt->nr_vcpus = 0;
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
- arch_ctxt->mapped_regs[i] = NULL;
+ arch_ctxt->mapped_regs = NULL;
xc_ia64_p2m_init(&arch_ctxt->p2m_table);
}
for ( i = 0; i < arch_ctxt->nr_vcpus; i++ )
if ( arch_ctxt->mapped_regs[i] != NULL )
munmap(arch_ctxt->mapped_regs[i], arch_ctxt->mapped_regs_size);
+ free(arch_ctxt->mapped_regs);
xc_ia64_p2m_unmap(&arch_ctxt->p2m_table);
}
errno = ENOENT;
return -1;
}
+ if ( !(arch_ctxt->nr_vcpus & (arch_ctxt->nr_vcpus - 1)) ) {
+ unsigned int nr = arch_ctxt->nr_vcpus ? arch_ctxt->nr_vcpus << 1 : 1;
+ mapped_regs_t** new = realloc(arch_ctxt->mapped_regs,
+ nr * sizeof(*new));
+
+ if ( !new )
+ {
+ PERROR("Could not alloc mapped regs pointer array");
+ return -1;
+ }
+ memset(new + arch_ctxt->nr_vcpus, 0,
+ (nr - arch_ctxt->nr_vcpus) * sizeof(*new));
+ arch_ctxt->mapped_regs = new;
+ }
+
mapped_regs = xc_map_foreign_range(xc_handle, domid,
arch_ctxt->mapped_regs_size,
PROT_READ, ctxt->privregs_pfn);
struct xc_core_arch_context {
size_t mapped_regs_size;
int nr_vcpus;
- mapped_regs_t* mapped_regs[MAX_VIRT_CPUS];
+ mapped_regs_t** mapped_regs;
struct xen_ia64_p2m_table p2m_table;
};
xc_dom_printf("%s: called\n", __FUNCTION__);
memset(shared_info, 0, sizeof(*shared_info));
- for (i = 0; i < MAX_VIRT_CPUS; i++)
+ for (i = 0; i < XEN_LEGACY_MAX_VCPUS; i++)
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
shared_info->arch.start_info_pfn = dom->start_info_pfn;
shared_info->arch.memmap_info_num_pages = 1; //XXX
xc_dom_printf("%s: called\n", __FUNCTION__);
memset(shared_info, 0, sizeof(*shared_info));
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
return 0;
}
xc_dom_printf("%s: called\n", __FUNCTION__);
memset(shared_info, 0, sizeof(*shared_info));
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
return 0;
}
/* clear any pending events and the selector */
MEMSET_ARRAY_FIELD(new_shared_info, evtchn_pending, 0);
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
SET_FIELD(new_shared_info, vcpu_info[i].evtchn_pending_sel, 0);
/* mask event channels */
size_t size, int prot, size_t chunksize,
privcmd_mmap_entry_t entries[], int nentries);
-void *map_domain_va_core(unsigned long domfd, int cpu, void *guest_va,
- vcpu_guest_context_any_t *ctxt);
-int xc_waitdomain_core(int xc_handle, int domain, int *status,
- int options, vcpu_guest_context_any_t *ctxt);
-
void bitmap_64_to_byte(uint8_t *bp, const uint64_t *lp, int nbits);
void bitmap_byte_to_64(uint64_t *lp, const uint8_t *bp, int nbits);
static uint64_t online_cpumap;
static uint64_t regs_valid;
-static vcpu_guest_context_any_t ctxt[MAX_VIRT_CPUS];
+static unsigned int nr_vcpu_ids;
+static vcpu_guest_context_any_t *ctxt;
#define FOREACH_CPU(cpumap, i) for ( cpumap = online_cpumap; (i = xc_ffs64(cpumap)); cpumap &= ~(1 << (index - 1)) )
return (cr0 & X86_CR0_PE) && (cr0 & X86_CR0_PG);
}
+vcpu_guest_context_any_t *xc_ptrace_get_vcpu_ctxt(unsigned int nr_cpus)
+{
+ if (nr_cpus > nr_vcpu_ids) {
+ vcpu_guest_context_any_t *new;
+
+ new = realloc(ctxt, nr_cpus * sizeof(*ctxt));
+ if (!new)
+ return NULL;
+ ctxt = new;
+ nr_vcpu_ids = nr_cpus;
+ }
+
+ return ctxt;
+}
+
/*
* Fetch registers for all online cpus and set the cpumap
* to indicate which cpus are online
{
int i, online;
+ if (!xc_ptrace_get_vcpu_ctxt(d->max_vcpu_id + 1))
+ return -ENOMEM;
+
*cpumap = 0;
for (i = 0; i <= d->max_vcpu_id; i++) {
fetch_regs(xc_handle, i, &online);
case PTRACE_PEEKDATA:
if (current_isfile)
guest_va = (unsigned long *)map_domain_va_core(
- current_domid, cpu, addr, ctxt);
+ current_domid, cpu, addr);
else
guest_va = (unsigned long *)map_domain_va(
xc_handle, cpu, addr, PROT_READ);
/* XXX assume that all CPUs have the same address space */
if (current_isfile)
guest_va = (unsigned long *)map_domain_va_core(
- current_domid, cpu, addr, ctxt);
+ current_domid, cpu, addr);
else
guest_va = (unsigned long *)map_domain_va(
xc_handle, cpu, addr, PROT_READ|PROT_WRITE);
int options)
{
if (current_isfile)
- return xc_waitdomain_core(xc_handle, domain, status, options, ctxt);
+ return xc_waitdomain_core(xc_handle, domain, status, options);
return __xc_waitdomain(xc_handle, domain, status, options);
}
}
#endif
+void *map_domain_va_core(unsigned long domfd, int cpu, void *guest_va);
+int xc_waitdomain_core(int xc_handle, int domain, int *status, int options);
+vcpu_guest_context_any_t *xc_ptrace_get_vcpu_ctxt(unsigned int nr_cpus);
+
+
#endif /* XC_PTRACE */
#include <time.h>
#include <inttypes.h>
+static unsigned int max_nr_vcpus;
+static unsigned long *cr3;
+static unsigned long *cr3_phys;
+static unsigned long **cr3_virt;
+static unsigned long *pde_phys;
+static unsigned long **pde_virt;
+static unsigned long *page_phys;
+static unsigned long **page_virt;
+
+static vcpu_guest_context_t *
+ptrace_core_get_vcpu_ctxt(unsigned int nr_vcpus)
+{
+ if (nr_vcpus > max_nr_vcpus) {
+ void *new;
+
+#define REALLOC(what) \
+ new = realloc(what, nr_vcpus * sizeof(*what)); \
+ if (!new) \
+ return NULL; \
+ memset(what + max_nr_vcpus, 0, \
+ (nr_vcpus - max_nr_vcpus) * sizeof(*what)); \
+ what = new
+
+ REALLOC(cr3);
+ REALLOC(cr3_phys);
+ REALLOC(cr3_virt);
+ REALLOC(pde_phys);
+ REALLOC(pde_virt);
+ REALLOC(page_phys);
+ REALLOC(page_virt);
+
+#undef REALLOC
+ max_nr_vcpus = nr_vcpus;
+ }
+
+ return &xc_ptrace_get_vcpu_ctxt(nr_vcpus)->c;
+}
+
/* Leave the code for the old format as is. */
/* --- compatible layer for old format ------------------------------------- */
/* XXX application state */
static unsigned long *p2m_array_compat = NULL;
static unsigned long *m2p_array_compat = NULL;
static unsigned long pages_offset_compat;
-static unsigned long cr3_compat[MAX_VIRT_CPUS];
/* --------------------- */
static void *
-map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va,
- vcpu_guest_context_t *ctxt)
+map_domain_va_core_compat(unsigned long domfd, int cpu, void *guest_va)
{
unsigned long pde, page;
unsigned long va = (unsigned long)guest_va;
void *v;
- static unsigned long cr3_phys[MAX_VIRT_CPUS];
- static unsigned long *cr3_virt[MAX_VIRT_CPUS];
- static unsigned long pde_phys[MAX_VIRT_CPUS];
- static unsigned long *pde_virt[MAX_VIRT_CPUS];
- static unsigned long page_phys[MAX_VIRT_CPUS];
- static unsigned long *page_virt[MAX_VIRT_CPUS];
-
- if (cr3_compat[cpu] != cr3_phys[cpu])
+ if (cr3[cpu] != cr3_phys[cpu])
{
- cr3_phys[cpu] = cr3_compat[cpu];
+ cr3_phys[cpu] = cr3[cpu];
if (cr3_virt[cpu])
munmap(cr3_virt[cpu], PAGE_SIZE);
v = mmap(
map_mtop_offset_compat(page_phys[cpu]));
if (v == MAP_FAILED)
{
- IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n", cr3_compat[cpu], pde, page, l1_table_offset_i386(va));
+ IPRINTF("cr3 %lx pde %lx page %lx pti %lx\n", cr3[cpu], pde, page, l1_table_offset_i386(va));
page_phys[cpu] = 0;
return NULL;
}
int xc_handle,
int domfd,
int *status,
- int options,
- vcpu_guest_context_t *ctxt)
+ int options)
{
int nr_vcpus;
int i;
+ vcpu_guest_context_t *ctxt;
xc_core_header_t header;
if ( nr_pages_compat == 0 )
nr_vcpus = header.xch_nr_vcpus;
pages_offset_compat = header.xch_pages_offset;
+ if ((ctxt = ptrace_core_get_vcpu_ctxt(nr_vcpus)) == NULL)
+ {
+ IPRINTF("Could not allocate vcpu context array\n");
+ return -1;
+ }
+
if (read(domfd, ctxt, sizeof(vcpu_guest_context_t)*nr_vcpus) !=
sizeof(vcpu_guest_context_t)*nr_vcpus)
return -1;
for (i = 0; i < nr_vcpus; i++)
- cr3_compat[i] = ctxt[i].ctrlreg[3];
+ cr3[i] = ctxt[i].ctrlreg[3];
if ((p2m_array_compat = malloc(nr_pages_compat * sizeof(unsigned long))) == NULL)
{
static uint64_t pfn_array_size = 0;
static long nr_pages = 0;
static uint64_t pages_offset;
-static unsigned long cr3[MAX_VIRT_CPUS];
static const struct xen_dumpcore_elfnote_format_version_desc
known_format_version[] =
}
static void *
-map_domain_va_core_elf(unsigned long domfd, int cpu, void *guest_va,
- vcpu_guest_context_t *ctxt)
+map_domain_va_core_elf(unsigned long domfd, int cpu, void *guest_va)
{
unsigned long pde, page;
unsigned long va = (unsigned long)guest_va;
unsigned long offset;
void *v;
- static unsigned long cr3_phys[MAX_VIRT_CPUS];
- static unsigned long *cr3_virt[MAX_VIRT_CPUS];
- static unsigned long pde_phys[MAX_VIRT_CPUS];
- static unsigned long *pde_virt[MAX_VIRT_CPUS];
- static unsigned long page_phys[MAX_VIRT_CPUS];
- static unsigned long *page_virt[MAX_VIRT_CPUS];
-
if (cr3[cpu] != cr3_phys[cpu])
{
if (cr3_virt[cpu])
int xc_handle,
int domfd,
int *status,
- int options,
- vcpu_guest_context_t *ctxt)
+ int options)
{
int i;
+ vcpu_guest_context_t *ctxt;
struct elf_core ecore;
struct xen_dumpcore_elfnote_none *none;
if ((header->header.xch_magic != XC_CORE_MAGIC &&
header->header.xch_magic != XC_CORE_MAGIC_HVM) ||
header->header.xch_nr_vcpus == 0 ||
- header->header.xch_nr_vcpus >= MAX_VIRT_CPUS ||
header->header.xch_nr_pages == 0 ||
header->header.xch_page_size != PAGE_SIZE)
goto out;
current_is_auto_translated_physmap =
(header->header.xch_magic == XC_CORE_MAGIC_HVM);
nr_pages = header->header.xch_nr_pages;
-
+
/* .note.Xen: xen_version */
if (elf_core_search_note(&ecore, XEN_DUMPCORE_ELFNOTE_NAME,
XEN_ELFNOTE_DUMPCORE_XEN_VERSION,
format_version->format_version.version);
}
+ if ((ctxt = ptrace_core_get_vcpu_ctxt(header->header.xch_nr_vcpus)) == NULL)
+ goto out;
+
/* .xen_prstatus: read vcpu_guest_context_t*/
if (elf_core_read_sec_by_name(&ecore, XEN_DUMPCORE_SEC_PRSTATUS,
(char*)ctxt) < 0)
typedef int (*xc_waitdomain_core_t)(int xc_handle,
int domfd,
int *status,
- int options,
- vcpu_guest_context_t *ctxt);
+ int options);
typedef void *(*map_domain_va_core_t)(unsigned long domfd,
int cpu,
- void *guest_va,
- vcpu_guest_context_t *ctxt);
+ void *guest_va);
struct xc_core_format_type {
xc_waitdomain_core_t waitdomain_core;
map_domain_va_core_t map_domain_va_core;
static const struct xc_core_format_type* current_format_type = NULL;
void *
-map_domain_va_core(unsigned long domfd, int cpu, void *guest_va,
- vcpu_guest_context_any_t *ctxt)
+map_domain_va_core(unsigned long domfd, int cpu, void *guest_va)
{
if (current_format_type == NULL)
return NULL;
- return (current_format_type->map_domain_va_core)(domfd, cpu, guest_va,
- &ctxt->c);
+ return (current_format_type->map_domain_va_core)(domfd, cpu, guest_va);
}
int
-xc_waitdomain_core(int xc_handle, int domfd, int *status, int options,
- vcpu_guest_context_any_t *ctxt)
+xc_waitdomain_core(int xc_handle, int domfd, int *status, int options)
{
int ret;
int i;
for (i = 0; i < NR_FORMAT_TYPE; i++) {
ret = (format_type[i].waitdomain_core)(xc_handle, domfd, status,
- options, &ctxt->c);
+ options);
if (ret == 0) {
current_format_type = &format_type[i];
break;
#include "ia64/xc_dom_ia64_util.h"
#define ia64_fc(addr) asm volatile ("fc %0" :: "r"(addr) : "memory")
+#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS /* XXX */
#endif /* __XEN__ */
#include <xen/acpi.h>
for ( i = 1; i < MAX_VIRT_CPUS; i++ )
d->shared_info->vcpu_info[i].evtchn_upcall_mask = 1;
- if (dom0_max_vcpus == 0)
- dom0_max_vcpus = MAX_VIRT_CPUS;
- if (dom0_max_vcpus > num_online_cpus())
- dom0_max_vcpus = num_online_cpus();
- if (dom0_max_vcpus > MAX_VIRT_CPUS)
- dom0_max_vcpus = MAX_VIRT_CPUS;
-
printk ("Dom0 max_vcpus=%d\n", dom0_max_vcpus);
for ( i = 1; i < dom0_max_vcpus; i++ )
if (alloc_vcpu(d, i, i) == NULL)
return 0;
}
+struct vcpu *__init alloc_dom0_vcpu0(void)
+{
+ if (dom0_max_vcpus == 0)
+ dom0_max_vcpus = MAX_VIRT_CPUS;
+ if (dom0_max_vcpus > num_online_cpus())
+ dom0_max_vcpus = num_online_cpus();
+ if (dom0_max_vcpus > MAX_VIRT_CPUS)
+ dom0_max_vcpus = MAX_VIRT_CPUS;
+
+ dom0->vcpu = xmalloc_array(struct vcpu *, dom0_max_vcpus);
+ if ( !dom0->vcpu )
+ return NULL;
+ memset(dom0->vcpu, 0, dom0_max_vcpus * sizeof(*dom0->vcpu));
+ dom0->max_vcpus = dom0_max_vcpus;
+
+ return alloc_vcpu(dom0, 0, 0);
+}
+
void machine_restart(unsigned int delay_millisecs)
{
mdelay(delay_millisecs);
scheduler_init();
idle_vcpu[0] = (struct vcpu*) ia64_r13;
idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
- if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
+ if ( idle_domain == NULL )
+ BUG();
+ idle_domain->vcpu = idle_vcpu;
+ idle_domain->max_vcpus = NR_CPUS;
+ if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
BUG();
alloc_dom_xen_and_dom_io();
if (dom0 == NULL)
panic("Error creating domain 0\n");
domain_set_vhpt_size(dom0, dom0_vhpt_size_log2);
- dom0_vcpu0 = alloc_vcpu(dom0, 0, 0);
+ dom0_vcpu0 = alloc_dom0_vcpu0();
if (dom0_vcpu0 == NULL || vcpu_late_initialise(dom0_vcpu0) != 0)
panic("Cannot allocate dom0 vcpu 0\n");
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 0;
- for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
+ for ( vcpuid = 0; vcpuid < d->max_vcpus; vcpuid++ )
{
if (d->vcpu[vcpuid])
release_compat_l4(d->vcpu[vcpuid]);
d->arch.is_32bit_pv = d->arch.has_32bit_shinfo = 1;
- for ( vcpuid = 0; vcpuid < MAX_VIRT_CPUS; vcpuid++ )
+ for ( vcpuid = 0; vcpuid < d->max_vcpus; vcpuid++ )
{
if ( (d->vcpu[vcpuid] != NULL) &&
(setup_compat_l4(d->vcpu[vcpuid]) != 0) )
#else /* __x86_64__ */
- d->arch.mm_perdomain_pt_pages = xmalloc_array(struct page_info *,
- PDPT_L2_ENTRIES);
- if ( !d->arch.mm_perdomain_pt_pages )
+ BUILD_BUG_ON(PDPT_L2_ENTRIES * sizeof(*d->arch.mm_perdomain_pt_pages)
+ != PAGE_SIZE);
+ pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
+ if ( !pg )
goto fail;
- memset(d->arch.mm_perdomain_pt_pages, 0,
- PDPT_L2_ENTRIES * sizeof(*d->arch.mm_perdomain_pt_pages));
+ d->arch.mm_perdomain_pt_pages = page_to_virt(pg);
+ clear_page(d->arch.mm_perdomain_pt_pages);
pg = alloc_domheap_page(NULL, MEMF_node(domain_to_node(d)));
if ( pg == NULL )
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
if ( d->arch.mm_perdomain_l3 )
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
- xfree(d->arch.mm_perdomain_pt_pages);
+ if ( d->arch.mm_perdomain_pt_pages )
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_pt_pages));
#else
free_xenheap_pages(d->arch.mm_perdomain_pt, pdpt_order);
#endif
if ( perdomain_pt_page(d, i) )
free_domheap_page(perdomain_pt_page(d, i));
}
- xfree(d->arch.mm_perdomain_pt_pages);
+ free_domheap_page(virt_to_page(d->arch.mm_perdomain_pt_pages));
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l2));
free_domheap_page(virt_to_page(d->arch.mm_perdomain_l3));
#endif
new_info = (vcpu_info_t *)(mapping + offset);
- memcpy(new_info, v->vcpu_info, sizeof(*new_info));
+ if ( v->vcpu_info )
+ memcpy(new_info, v->vcpu_info, sizeof(*new_info));
+ else
+ {
+ memset(new_info, 0, sizeof(*new_info));
+ __vcpu_info(v, new_info, evtchn_upcall_mask) = 1;
+ }
v->vcpu_info = new_info;
v->arch.vcpu_info_mfn = mfn;
}
custom_param("dom0_mem", parse_dom0_mem);
-static unsigned int opt_dom0_max_vcpus;
+static unsigned int __initdata opt_dom0_max_vcpus;
integer_param("dom0_max_vcpus", opt_dom0_max_vcpus);
+struct vcpu *__init alloc_dom0_vcpu0(void)
+{
+ if ( opt_dom0_max_vcpus == 0 )
+ opt_dom0_max_vcpus = num_online_cpus();
+ if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
+ opt_dom0_max_vcpus = MAX_VIRT_CPUS;
+
+ dom0->vcpu = xmalloc_array(struct vcpu *, opt_dom0_max_vcpus);
+ if ( !dom0->vcpu )
+ return NULL;
+ memset(dom0->vcpu, 0, opt_dom0_max_vcpus * sizeof(*dom0->vcpu));
+ dom0->max_vcpus = opt_dom0_max_vcpus;
+
+ return alloc_vcpu(dom0, 0, 0);
+}
+
static unsigned int opt_dom0_shadow;
boolean_param("dom0_shadow", opt_dom0_shadow);
#endif /* __x86_64__ */
/* Mask all upcalls... */
- for ( i = 0; i < MAX_VIRT_CPUS; i++ )
+ for ( i = 0; i < XEN_LEGACY_MAX_VCPUS; i++ )
shared_info(d, vcpu_info[i].evtchn_upcall_mask) = 1;
- if ( opt_dom0_max_vcpus == 0 )
- opt_dom0_max_vcpus = num_online_cpus();
- if ( opt_dom0_max_vcpus > MAX_VIRT_CPUS )
- opt_dom0_max_vcpus = MAX_VIRT_CPUS;
printk("Dom0 has maximum %u VCPUs\n", opt_dom0_max_vcpus);
for ( i = 1; i < opt_dom0_max_vcpus; i++ )
goto sendtrigger_out;
ret = -ESRCH;
- if ( (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
+ if ( domctl->u.sendtrigger.vcpu >= d->max_vcpus ||
+ (v = d->vcpu[domctl->u.sendtrigger.vcpu]) == NULL )
goto sendtrigger_out;
switch ( domctl->u.sendtrigger.trigger )
goto ext_vcpucontext_out;
ret = -ESRCH;
- if ( (evc->vcpu >= MAX_VIRT_CPUS) ||
+ if ( (evc->vcpu >= d->max_vcpus) ||
((v = d->vcpu[evc->vcpu]) == NULL) )
goto ext_vcpucontext_out;
break;
ret = -EINVAL;
- if ( (domctl->u.debug_op.vcpu >= MAX_VIRT_CPUS) ||
+ if ( (domctl->u.debug_op.vcpu >= d->max_vcpus) ||
((v = d->vcpu[domctl->u.debug_op.vcpu]) == NULL) )
goto debug_op_out;
/* Stop all asynchronous timer actions. */
rtc_deinit(d);
- if ( d->vcpu[0] != NULL )
+ if ( d->vcpu != NULL && d->vcpu[0] != NULL )
{
pit_deinit(d);
pmtimer_deinit(d);
/* Which vcpu is this? */
vcpuid = hvm_load_instance(h);
- if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
return -EINVAL;
domain_pause(d);
domain_lock(d);
- if ( d->is_dying || (d->vcpu[0] == NULL) ||
+ if ( d->is_dying || (d->vcpu == NULL) || (d->vcpu[0] == NULL) ||
test_and_set_bool(d->arch.hvm_domain.is_s3_suspended) )
{
domain_unlock(d);
goto param_fail2;
rc = -EINVAL;
- if ( d->vcpu[0] == NULL )
+ if ( d->vcpu == NULL || d->vcpu[0] == NULL )
goto param_fail2;
if ( shadow_mode_enabled(d) )
struct hvm_hw_mtrr hw_mtrr;
vcpuid = hvm_load_instance(h);
- if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
gdprintk(XENLOG_ERR, "HVM restore: domain has no vcpu %u\n", vcpuid);
return -EINVAL;
*igmt = 0;
- if ( (current->domain != d) && ((v = d->vcpu[0]) == NULL) )
+ if ( (current->domain != d) &&
+ ((d->vcpu == NULL) || ((v = d->vcpu[0]) == NULL)) )
return MTRR_TYPE_WRBACK;
if ( !v->domain->arch.hvm_domain.params[HVM_PARAM_IDENT_PT] )
/* Force round-robin to pick VCPU 0 */
if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
{
- v = vioapic_domain(vioapic)->vcpu[0];
+ v = vioapic_domain(vioapic)->vcpu ?
+ vioapic_domain(vioapic)->vcpu[0] : NULL;
target = v ? vcpu_vlapic(v) : NULL;
}
else
if ( !(deliver_bitmask & (1 << bit)) )
continue;
deliver_bitmask &= ~(1 << bit);
+ if ( vioapic_domain(vioapic)->vcpu == NULL )
+ v = NULL;
#ifdef IRQ0_SPECIAL_ROUTING
/* Do not deliver timer interrupts to VCPU != 0 */
- if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
+ else if ( (irq == hvm_isa_irq_to_gsi(0)) && pit_channel0_enabled() )
v = vioapic_domain(vioapic)->vcpu[0];
- else
#endif
+ else
v = vioapic_domain(vioapic)->vcpu[bit];
if ( v != NULL )
{
if ( !(deliver_bitmask & (1 << bit)) )
continue;
deliver_bitmask &= ~(1 << bit);
- if ( ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) &&
+ if ( (vioapic_domain(vioapic)->vcpu != NULL) &&
+ ((v = vioapic_domain(vioapic)->vcpu[bit]) != NULL) &&
!test_and_set_bool(v->nmi_pending) )
vcpu_kick(v);
}
struct vlapic *vlapic, *target = NULL;
struct vcpu *v;
- if ( unlikely((v = d->vcpu[old]) == NULL) )
+ if ( unlikely(!d->vcpu) || unlikely((v = d->vcpu[old]) == NULL) )
return NULL;
do {
/* Which vlapic to load? */
vcpuid = hvm_load_instance(h);
- if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
return -EINVAL;
/* Which vlapic to load? */
vcpuid = hvm_load_instance(h);
- if ( vcpuid >= MAX_VIRT_CPUS || (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
{
gdprintk(XENLOG_ERR, "HVM restore: domain has no vlapic %u\n", vcpuid);
return -EINVAL;
void ept_sync_domain(struct domain *d)
{
/* Only if using EPT and this domain has some VCPUs to dirty. */
- if ( d->arch.hvm_domain.hap_enabled && d->vcpu[0] )
+ if ( d->arch.hvm_domain.hap_enabled && d->vcpu && d->vcpu[0] )
{
ASSERT(local_irq_is_enabled());
on_each_cpu(__ept_sync_domain, d, 1);
};
static unsigned long *vpid_bitmap;
-#define VPID_BITMAP_SIZE ((1u << VMCS_VPID_WIDTH) / MAX_VIRT_CPUS)
+#define VPID_BITMAP_SIZE ((1u << VMCS_VPID_WIDTH) / XEN_LEGACY_MAX_VCPUS)
void start_vmx(void)
{
}
while ( test_and_set_bit(idx, vpid_bitmap) );
- d->arch.hvm_domain.vmx.vpid_base = idx * MAX_VIRT_CPUS;
+ d->arch.hvm_domain.vmx.vpid_base = idx * XEN_LEGACY_MAX_VCPUS;
return 0;
}
if ( !cpu_has_vmx_vpid )
return;
- clear_bit(d->arch.hvm_domain.vmx.vpid_base / MAX_VIRT_CPUS, vpid_bitmap);
+ clear_bit(d->arch.hvm_domain.vmx.vpid_base / XEN_LEGACY_MAX_VCPUS,
+ vpid_bitmap);
}
static void vmx_install_vlapic_mapping(struct vcpu *v)
if ( vpic->is_master )
{
/* Master INT line is connected to VCPU0's VLAPIC LVT0. */
- struct vcpu *v = vpic_domain(vpic)->vcpu[0];
+ struct vcpu *v = vpic_domain(vpic)->vcpu ?
+ vpic_domain(vpic)->vcpu[0] : NULL;
+
if ( (v != NULL) && vlapic_accept_pic_intr(v) )
vcpu_kick(v);
}
*/
if ( (pfn >= 0x100000) &&
unlikely(!VM_ASSIST(d, VMASST_TYPE_pae_extended_cr3)) &&
- d->vcpu[0] && d->vcpu[0]->is_initialised )
+ d->vcpu && d->vcpu[0] && d->vcpu[0]->is_initialised )
{
MEM_LOG("PAE pgd must be below 4GB (0x%lx >= 0x100000)", pfn);
return -EINVAL;
for ( vmask = 0, offs = 0; ; ++offs)
{
vcpu_bias = offs * (is_native ? BITS_PER_LONG : 32);
- if ( vcpu_bias >= MAX_VIRT_CPUS )
+ if ( vcpu_bias >= d->max_vcpus )
return 0;
if ( unlikely(is_native ?
vcpu_id = find_first_set_bit(vmask);
vmask &= ~(1UL << vcpu_id);
vcpu_id += vcpu_bias;
- if ( (vcpu_id >= MAX_VIRT_CPUS) )
+ if ( (vcpu_id >= d->max_vcpus) )
return 0;
if ( ((v = d->vcpu[vcpu_id]) != NULL) )
cpus_or(*pmask, *pmask, v->vcpu_dirty_cpumask);
return 0;
}
- if ( unlikely(d->vcpu[0] == NULL) )
+ if ( unlikely(d->vcpu == NULL) || unlikely(d->vcpu[0] == NULL) )
{
PAGING_ERROR("Paging op on a domain (%u) with no vcpus\n",
d->domain_id);
void shadow_blow_tables_per_domain(struct domain *d)
{
- if ( shadow_mode_enabled(d) && d->vcpu[0] != NULL ) {
+ if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL ) {
shadow_lock(d);
shadow_blow_tables(d);
shadow_unlock(d);
rcu_read_lock(&domlist_read_lock);
for_each_domain(d)
{
- if ( shadow_mode_enabled(d) && d->vcpu[0] != NULL )
+ if ( shadow_mode_enabled(d) && d->vcpu != NULL && d->vcpu[0] != NULL )
{
shadow_lock(d);
shadow_blow_tables(d);
for_each_cpu ( i )
printk("%3d\t%3d\n", i, nmi_count(i));
- if ( ((d = dom0) == NULL) || ((v = d->vcpu[0]) == NULL) )
+ if ( ((d = dom0) == NULL) || (d->vcpu == NULL) ||
+ ((v = d->vcpu[0]) == NULL) )
return;
if ( v->nmi_pending || (v->trap_priority >= VCPU_TRAP_NMI) )
scheduler_init();
idle_domain = domain_create(IDLE_DOMAIN_ID, 0, 0);
- if ( (idle_domain == NULL) || (alloc_vcpu(idle_domain, 0, 0) == NULL) )
+ if ( idle_domain == NULL )
+ BUG();
+ idle_domain->vcpu = idle_vcpu;
+ idle_domain->max_vcpus = NR_CPUS;
+ if ( alloc_vcpu(idle_domain, 0, 0) == NULL )
BUG();
- set_current(idle_domain->vcpu[0]);
- idle_vcpu[0] = this_cpu(curr_vcpu) = current;
+ set_current(idle_vcpu[0]);
+ this_cpu(curr_vcpu) = current;
setup_idle_pagetable();
}
/* Create initial domain 0. */
dom0 = domain_create(0, DOMCRF_s3_integrity, DOM0_SSIDREF);
- if ( (dom0 == NULL) || (alloc_vcpu(dom0, 0, 0) == NULL) )
+ if ( (dom0 == NULL) || (alloc_dom0_vcpu0() == NULL) )
panic("Error creating domain 0\n");
dom0->is_privileged = 1;
{
struct domain *d = dom0;
- if ( (d == NULL) || (d->vcpu[0] == NULL) )
+ if ( (d == NULL) || (d->vcpu == NULL) || (d->vcpu[0] == NULL) )
return;
set_bit(reason_idx, nmi_reason(d));
struct trap_info *t;
BUG_ON(d == NULL);
- BUG_ON(vcpuid >= MAX_VIRT_CPUS);
+ BUG_ON(vcpuid >= d->max_vcpus);
/* Sanity check - XXX should be more fine grained. */
BUG_ON(trap_nr > TRAP_syscall);
struct softirq_trap *st;
BUG_ON(d == NULL);
- BUG_ON(vcpuid >= MAX_VIRT_CPUS);
+ BUG_ON(vcpuid >= d->max_vcpus);
v = d->vcpu[vcpuid];
switch (trap_nr) {
if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
return -EINVAL;
- if ( (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
return -ENOENT;
switch ( cmd )
{
struct vcpu *v;
- BUG_ON(d->vcpu[vcpu_id] != NULL);
+ BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]);
if ( (v = alloc_vcpu_struct()) == NULL )
return NULL;
v->runstate.state = RUNSTATE_offline;
v->runstate.state_entry_time = NOW();
set_bit(_VPF_down, &v->pause_flags);
- v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
+ if ( vcpu_id < XEN_LEGACY_MAX_VCPUS )
+ v->vcpu_info = (void *)&shared_info(d, vcpu_info[vcpu_id]);
}
if ( sched_init_vcpu(v, cpu_id) != 0 )
struct vcpu *alloc_idle_vcpu(unsigned int cpu_id)
{
- struct domain *d;
- struct vcpu *v;
- unsigned int vcpu_id = cpu_id % MAX_VIRT_CPUS;
-
- if ( (v = idle_vcpu[cpu_id]) != NULL )
- return v;
-
- d = (vcpu_id == 0) ?
- domain_create(IDLE_DOMAIN_ID, 0, 0) :
- idle_vcpu[cpu_id - vcpu_id]->domain;
- BUG_ON(d == NULL);
-
- v = alloc_vcpu(d, vcpu_id, cpu_id);
- idle_vcpu[cpu_id] = v;
-
- return v;
+ return idle_vcpu[cpu_id] ?: alloc_vcpu(idle_vcpu[0]->domain,
+ cpu_id, cpu_id);
}
static unsigned int extra_dom0_irqs, extra_domU_irqs = 8;
struct vcpu *v;
int i;
- for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
+ for ( i = d->max_vcpus - 1; i >= 0; i-- )
{
if ( (v = d->vcpu[i]) == NULL )
continue;
/* Free page used by xen oprofile buffer. */
free_xenoprof_pages(d);
- for ( i = MAX_VIRT_CPUS-1; i >= 0; i-- )
+ for ( i = d->max_vcpus - 1; i >= 0; i-- )
if ( (v = d->vcpu[i]) != NULL )
free_vcpu_struct(v);
if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) )
return -EINVAL;
- if ( (v = d->vcpu[vcpuid]) == NULL )
+ if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL )
return -ENOENT;
switch ( cmd )
{
case VCPUOP_initialise:
+ if ( !v->vcpu_info )
+ return -EINVAL;
+
if ( (ctxt = xmalloc(struct vcpu_guest_context)) == NULL )
return -ENOMEM;
ret = -EINVAL;
if ( (d == current->domain) || /* no domain_pause() */
- (vcpu >= MAX_VIRT_CPUS) || ((v = d->vcpu[vcpu]) == NULL) )
+ (vcpu >= d->max_vcpus) || ((v = d->vcpu[vcpu]) == NULL) )
goto svc_out;
if ( guest_handle_is_null(op->u.vcpucontext.ctxt) )
ret = -EINVAL;
if ( (d == current->domain) || /* no domain_pause() */
- (max > MAX_VIRT_CPUS) )
+ (max > MAX_VIRT_CPUS) ||
+ (is_hvm_domain(d) && max > XEN_LEGACY_MAX_VCPUS) )
{
rcu_unlock_domain(d);
break;
break;
}
+ /* Until Xenoprof can dynamically grow its vcpu-s array... */
+ if ( d->xenoprof )
+ {
+ rcu_unlock_domain(d);
+ ret = -EAGAIN;
+ break;
+ }
+
/* Needed, for example, to ensure writable p.t. state is synced. */
domain_pause(d);
/* We cannot reduce maximum VCPUs. */
ret = -EINVAL;
- if ( (max != MAX_VIRT_CPUS) && (d->vcpu[max] != NULL) )
+ if ( (max < d->max_vcpus) && (d->vcpu[max] != NULL) )
goto maxvcpu_out;
ret = -ENOMEM;
+ if ( max > d->max_vcpus )
+ {
+ struct vcpu **vcpus = xmalloc_array(struct vcpu *, max);
+ void *ptr;
+
+ if ( !vcpus )
+ goto maxvcpu_out;
+ memcpy(vcpus, d->vcpu, d->max_vcpus * sizeof(*vcpus));
+ memset(vcpus + d->max_vcpus, 0,
+ (max - d->max_vcpus) * sizeof(*vcpus));
+
+ ptr = d->vcpu;
+ d->vcpu = vcpus;
+ wmb();
+ d->max_vcpus = max;
+ xfree(ptr);
+ }
for ( i = 0; i < max; i++ )
{
if ( d->vcpu[i] != NULL )
goto vcpuaffinity_out;
ret = -EINVAL;
- if ( op->u.vcpuaffinity.vcpu >= MAX_VIRT_CPUS )
+ if ( op->u.vcpuaffinity.vcpu >= d->max_vcpus )
goto vcpuaffinity_out;
ret = -ESRCH;
goto getvcpucontext_out;
ret = -EINVAL;
- if ( op->u.vcpucontext.vcpu >= MAX_VIRT_CPUS )
+ if ( op->u.vcpucontext.vcpu >= d->max_vcpus )
goto getvcpucontext_out;
ret = -ESRCH;
goto getvcpuinfo_out;
ret = -EINVAL;
- if ( op->u.getvcpuinfo.vcpu >= MAX_VIRT_CPUS )
+ if ( op->u.getvcpuinfo.vcpu >= d->max_vcpus )
goto getvcpuinfo_out;
ret = -ESRCH;
if ( virq_is_global(virq) && (vcpu != 0) )
return -EINVAL;
- if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
+ if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
((v = d->vcpu[vcpu]) == NULL) )
return -ENOENT;
+ if ( unlikely(!v->vcpu_info) )
+ return -EAGAIN;
+
spin_lock(&d->event_lock);
if ( v->virq_to_evtchn[virq] != 0 )
int port, vcpu = bind->vcpu;
long rc = 0;
- if ( (vcpu < 0) || (vcpu >= ARRAY_SIZE(d->vcpu)) ||
+ if ( (vcpu < 0) || (vcpu >= d->max_vcpus) ||
(d->vcpu[vcpu] == NULL) )
return -ENOENT;
+ if ( unlikely(!d->vcpu[vcpu]->vcpu_info) )
+ return -EAGAIN;
+
spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
}
/* Check if some VCPU might be polling for this event. */
- if ( likely(bitmap_empty(d->poll_mask, MAX_VIRT_CPUS)) )
+ if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
return 0;
/* Wake any interested (or potentially interested) pollers. */
- for ( vcpuid = find_first_bit(d->poll_mask, MAX_VIRT_CPUS);
- vcpuid < MAX_VIRT_CPUS;
- vcpuid = find_next_bit(d->poll_mask, MAX_VIRT_CPUS, vcpuid+1) )
+ for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
+ vcpuid < d->max_vcpus;
+ vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
{
v = d->vcpu[vcpuid];
if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
ASSERT(virq_is_global(virq));
- if ( unlikely(d == NULL) )
+ if ( unlikely(d == NULL) || unlikely(d->vcpu == NULL) )
return;
v = d->vcpu[0];
struct evtchn *chn;
long rc = 0;
- if ( (vcpu_id >= ARRAY_SIZE(d->vcpu)) || (d->vcpu[vcpu_id] == NULL) )
+ if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) )
return -ENOENT;
+ if ( unlikely(!d->vcpu[vcpu_id]->vcpu_info) )
+ return -EAGAIN;
+
spin_lock(&d->event_lock);
if ( !port_is_valid(d, port) )
struct domain *d = local_vcpu->domain;
int port;
+ if ( unlikely(!local_vcpu->vcpu_info) )
+ return -EAGAIN;
+
spin_lock(&d->event_lock);
if ( (port = get_free_port(d)) < 0 )
if ( get_free_port(d) != 0 )
return -EINVAL;
evtchn_from_port(d, 0)->state = ECS_RESERVED;
+
+#if MAX_VIRT_CPUS > BITS_PER_LONG
+ d->poll_mask = xmalloc_array(unsigned long, BITS_TO_LONGS(MAX_VIRT_CPUS));
+ if ( !d->poll_mask )
+ return -ENOMEM;
+ bitmap_zero(d->poll_mask, MAX_VIRT_CPUS);
+#endif
+
return 0;
}
d->evtchn[i] = NULL;
}
spin_unlock(&d->event_lock);
+
+#if MAX_VIRT_CPUS > BITS_PER_LONG
+ xfree(d->poll_mask);
+ d->poll_mask = NULL;
+#endif
}
static void domain_dump_evtchn_info(struct domain *d)
v->vcpu_id, v->processor,
v->is_running ? 'T':'F',
v->pause_flags, v->poll_evtchn,
- vcpu_info(v, evtchn_upcall_pending),
- vcpu_info(v, evtchn_upcall_mask));
+ v->vcpu_info ? vcpu_info(v, evtchn_upcall_pending) : 0,
+ v->vcpu_info ? vcpu_info(v, evtchn_upcall_mask) : 1);
cpuset_print(tmpstr, sizeof(tmpstr), v->vcpu_dirty_cpumask);
printk("dirty_cpus=%s ", tmpstr);
cpuset_print(tmpstr, sizeof(tmpstr), v->cpu_affinity);
arch_dump_vcpu_info(v);
periodic_timer_print(tmpstr, sizeof(tmpstr), v->periodic_period);
printk(" %s\n", tmpstr);
+ if ( !v->vcpu_info )
+ continue;
printk(" Notifying guest (virq %d, port %d, stat %d/%d/%d)\n",
VIRQ_DEBUG, v->virq_to_evtchn[VIRQ_DEBUG],
test_bit(v->virq_to_evtchn[VIRQ_DEBUG],
p->domain_id, op->u.sedf.period, op->u.sedf.slice,
op->u.sedf.latency, (op->u.sedf.extratime)?"yes":"no");
+ if ( !p->vcpu )
+ return -EINVAL;
+
if ( op->cmd == XEN_DOMCTL_SCHEDOP_putinfo )
{
/* Check for sane parameters. */
return;
}
- for ( j = 0; j < MAX_VIRT_CPUS; j++ )
+ for ( j = 0; j < d->max_vcpus; j++ )
{
buf = d->xenoprof->vcpu[j].buffer;
if ( buf != NULL )
memset(d->xenoprof, 0, sizeof(*d->xenoprof));
+ d->xenoprof->vcpu = xmalloc_array(struct xenoprof_vcpu, d->max_vcpus);
+ if ( d->xenoprof->vcpu == NULL )
+ {
+ xfree(d->xenoprof);
+ d->xenoprof = NULL;
+ printk("alloc_xenoprof_struct(): vcpu array allocation failed\n");
+ return -ENOMEM;
+ }
+
+ memset(d->xenoprof->vcpu, 0, d->max_vcpus * sizeof(*d->xenoprof->vcpu));
+
nvcpu = 0;
for_each_vcpu ( d, v )
nvcpu++;
#else
#define NR_CPUS 64
#endif
+#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
#define CONFIG_NUMA
#define CONFIG_ACPI_NUMA
#define NODES_SHIFT 8 /* linux/asm/numnodes.h */
#define NR_CPUS 32
#endif
-#if defined(__i386__) && (NR_CPUS > 32)
+#ifdef __i386__
+#if NR_CPUS > 32
#error "Maximum of 32 physical processors supported by Xen on x86_32"
#endif
+/* Maximum number of virtual CPUs in multi-processor guests. */
+#define MAX_VIRT_CPUS XEN_LEGACY_MAX_VCPUS
+#endif
#ifdef CONFIG_X86_SUPERVISOR_MODE_KERNEL
# define supervisor_mode_kernel (1)
/* Slot 260: per-domain mappings. */
#define PERDOMAIN_VIRT_START (PML4_ADDR(260))
#define PERDOMAIN_VIRT_END (PERDOMAIN_VIRT_START + (PERDOMAIN_MBYTES<<20))
-#define PERDOMAIN_MBYTES ((unsigned long)GDT_LDT_MBYTES)
+#define PERDOMAIN_MBYTES (PML4_ENTRY_BYTES >> (20 + PAGETABLE_ORDER))
/* Slot 261: machine-to-phys conversion table (16GB). */
#define RDWR_MPT_VIRT_START (PML4_ADDR(261))
#define RDWR_MPT_VIRT_END (RDWR_MPT_VIRT_START + (16UL<<30))
#define COMPAT_L2_PAGETABLE_XEN_SLOTS(d) \
(COMPAT_L2_PAGETABLE_LAST_XEN_SLOT - COMPAT_L2_PAGETABLE_FIRST_XEN_SLOT(d) + 1)
+#define COMPAT_LEGACY_MAX_VCPUS XEN_LEGACY_MAX_VCPUS
+
#endif
#define PGT_base_page_table PGT_l4_page_table
/* GDT/LDT shadow mapping area. The first per-domain-mapping sub-area. */
#define GDT_LDT_VCPU_SHIFT 5
#define GDT_LDT_VCPU_VA_SHIFT (GDT_LDT_VCPU_SHIFT + PAGE_SHIFT)
+#ifdef MAX_VIRT_CPUS
#define GDT_LDT_MBYTES (MAX_VIRT_CPUS >> (20-GDT_LDT_VCPU_VA_SHIFT))
+#else
+#define GDT_LDT_MBYTES PERDOMAIN_MBYTES
+#define MAX_VIRT_CPUS (GDT_LDT_MBYTES << (20-GDT_LDT_VCPU_VA_SHIFT))
+#endif
#define GDT_LDT_VIRT_START PERDOMAIN_VIRT_START
#define GDT_LDT_VIRT_END (GDT_LDT_VIRT_START + (GDT_LDT_MBYTES << 20))
{
struct vcpu *v = current;
if ( v->domain != d )
- v = d->vcpu[0];
+ v = d->vcpu ? d->vcpu[0] : NULL;
if ( likely(v && paging_mode_enabled(d) && v->arch.paging.mode != NULL) )
{
return v->arch.paging.mode->write_p2m_entry(v, gfn, p, table_mfn,
#define GET_SET_VCPU(type, field) \
static inline type arch_get_##field(const struct vcpu *v) \
{ \
+ if ( unlikely(!v->vcpu_info) ) \
+ return 0; \
return !has_32bit_shinfo(v->domain) ? \
v->vcpu_info->native.arch.field : \
v->vcpu_info->compat.arch.field; \
#define GET_SET_VCPU(type, field) \
static inline type arch_get_##field(const struct vcpu *v) \
{ \
- return v->vcpu_info->arch.field; \
+ return v->vcpu_info ? v->vcpu_info->arch.field : 0; \
} \
static inline void arch_set_##field(struct vcpu *v, \
type val) \
/* Maximum number of virtual CPUs in multi-processor guests. */
/* WARNING: before changing this, check that shared_info fits on a page */
-#define MAX_VIRT_CPUS 64
+#define XEN_LEGACY_MAX_VCPUS 64
/* IO ports location for PV. */
#define IO_PORTS_PADDR 0x00000ffffc000000UL
#define FIRST_RESERVED_GDT_BYTE (FIRST_RESERVED_GDT_PAGE * 4096)
#define FIRST_RESERVED_GDT_ENTRY (FIRST_RESERVED_GDT_BYTE / 8)
-/* Maximum number of virtual CPUs in multi-processor guests. */
-#define MAX_VIRT_CPUS 32
+/* Maximum number of virtual CPUs in legacy multi-processor guests. */
+#define XEN_LEGACY_MAX_VCPUS 32
#ifndef __ASSEMBLY__
* of this structure remaining constant.
*/
struct shared_info {
- struct vcpu_info vcpu_info[MAX_VIRT_CPUS];
+ struct vcpu_info vcpu_info[XEN_LEGACY_MAX_VCPUS];
/*
* A domain can create "event channels" on which it can send and receive
int boot_vcpu(
struct domain *d, int vcpuid, vcpu_guest_context_u ctxt);
struct vcpu *alloc_idle_vcpu(unsigned int cpu_id);
+struct vcpu *alloc_dom0_vcpu0(void);
void vcpu_reset(struct vcpu *v);
struct xen_domctl_getdomaininfo;
#define vcpu_to_node(v) (cpu_to_node((v)->processor))
#define domain_to_node(d) \
- (((d)->vcpu[0] != NULL) ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
+ (((d)->vcpu != NULL && (d)->vcpu[0] != NULL) \
+ ? vcpu_to_node((d)->vcpu[0]) : NUMA_NO_NODE)
#endif /* _XEN_NUMA_H */
unsigned int max_pages; /* maximum value for tot_pages */
unsigned int xenheap_pages; /* # pages allocated from Xen heap */
+ unsigned int max_vcpus;
+
/* Scheduling. */
void *sched_priv; /* scheduler-specific data */
bool_t is_pinned;
/* Are any VCPUs polling event channels (SCHEDOP_poll)? */
+#if MAX_VIRT_CPUS <= BITS_PER_LONG
DECLARE_BITMAP(poll_mask, MAX_VIRT_CPUS);
+#else
+ unsigned long *poll_mask;
+#endif
/* Guest has shut down (inc. reason code)? */
spinlock_t shutdown_lock;
atomic_t refcnt;
- struct vcpu *vcpu[MAX_VIRT_CPUS];
+ struct vcpu **vcpu;
/* Bitmask of CPUs which are holding onto this domain's state. */
cpumask_t domain_dirty_cpumask;
(_d) = rcu_dereference((_d)->next_in_list )) \
#define for_each_vcpu(_d,_v) \
- for ( (_v) = (_d)->vcpu[0]; \
+ for ( (_v) = (_d)->vcpu ? (_d)->vcpu[0] : NULL; \
(_v) != NULL; \
(_v) = (_v)->next_in_list )
(*(!has_32bit_shinfo(d) ? \
(typeof(&(s)->compat.field))&(s)->native.field : \
(typeof(&(s)->compat.field))&(s)->compat.field))
-#define shared_info(d, field) \
- __shared_info(d, (d)->shared_info, field)
typedef union {
struct vcpu_info native;
} vcpu_info_t;
/* As above, cast to compat field type. */
-#define vcpu_info(v, field) \
- (*(!has_32bit_shinfo((v)->domain) ? \
- (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->native.field : \
- (typeof(&(v)->vcpu_info->compat.field))&(v)->vcpu_info->compat.field))
+#define __vcpu_info(v, i, field) \
+ (*(!has_32bit_shinfo((v)->domain) ? \
+ (typeof(&(i)->compat.field))&(i)->native.field : \
+ (typeof(&(i)->compat.field))&(i)->compat.field))
#else
typedef struct shared_info shared_info_t;
-#define shared_info(d, field) ((d)->shared_info->field)
+#define __shared_info(d, s, field) ((s)->field)
typedef struct vcpu_info vcpu_info_t;
-#define vcpu_info(v, field) ((v)->vcpu_info->field)
+#define __vcpu_info(v, i, field) ((i)->field)
#endif
+#define shared_info(d, field) __shared_info(d, (d)->shared_info, field)
+#define vcpu_info(v, field) __vcpu_info(v, (v)->vcpu_info, field)
+
#endif /* __XEN_SHARED_H__ */
#ifdef CONFIG_COMPAT
int is_compat;
#endif
- struct xenoprof_vcpu vcpu [MAX_VIRT_CPUS];
+ struct xenoprof_vcpu *vcpu;
};
#ifndef CONFIG_COMPAT